Prelims

Load packages:

library(tidyverse)
library(brms)
library(chron)

R and package versions for reporting and reproducibility:

R.Version()
## $platform
## [1] "x86_64-apple-darwin17.0"
## 
## $arch
## [1] "x86_64"
## 
## $os
## [1] "darwin17.0"
## 
## $system
## [1] "x86_64, darwin17.0"
## 
## $status
## [1] ""
## 
## $major
## [1] "4"
## 
## $minor
## [1] "0.2"
## 
## $year
## [1] "2020"
## 
## $month
## [1] "06"
## 
## $day
## [1] "22"
## 
## $`svn rev`
## [1] "78730"
## 
## $language
## [1] "R"
## 
## $version.string
## [1] "R version 4.0.2 (2020-06-22)"
## 
## $nickname
## [1] "Taking Off Again"
packageVersion('tidyverse')
## [1] '1.3.0'
packageVersion('brms')
## [1] '2.14.4'

Load data:

freq <- read_csv('../data/frequency_size.csv')
path <- read_csv('../data/path_manner_ground_viewpoint.csv')

Show:

freq
## # A tibble: 54 x 10
##    Language Condition Participant Gender Total_Freq Vertical_Big Lateral_Big
##    <chr>    <chr>           <dbl> <chr>       <dbl>        <dbl>       <dbl>
##  1 Korean   Friend              1 F              91           75          30
##  2 Korean   Friend              2 M              62           20           5
##  3 Korean   Friend              3 F              89           67          28
##  4 Korean   Friend              4 F              30           28          13
##  5 Korean   Friend              5 M              33           24           3
##  6 Korean   Friend              6 F              26           25           1
##  7 Korean   Friend              7 F              63           52          32
##  8 Korean   Friend              8 M              51           44          14
##  9 Korean   Friend              9 M              67           66          15
## 10 Korean   Friend             10 M              61           51          33
## # … with 44 more rows, and 3 more variables: Sagittal_Big <dbl>,
## #   Both_Hands <dbl>, Shape_Open <dbl>
path
## # A tibble: 54 x 11
##    Language Condition    ID Gender TotalEvents  Path Manner Ground Character
##    <chr>    <chr>     <dbl> <chr>        <dbl> <dbl>  <dbl>  <dbl>     <dbl>
##  1 Korean   Friend        1 F               16    13     12      3         8
##  2 Korean   Friend        2 M               14     8     10      0         8
##  3 Korean   Friend        3 F               15    12     12      4         5
##  4 Korean   Friend        4 F                7     6      6      2         4
##  5 Korean   Friend        5 M                8     7      5      0         3
##  6 Korean   Friend        6 F               13    11      9      3         6
##  7 Korean   Friend        7 F               10     7      7      4         5
##  8 Korean   Friend        8 M               13    10      9      3         4
##  9 Korean   Friend        9 M               15    12     10      5         8
## 10 Korean   Friend       10 M               13    10      7      2         5
## # … with 44 more rows, and 2 more variables: Observer <dbl>, Dual <dbl>

We can just append the two into a new tibble called ‘dyads’:

dyads <- bind_cols(freq, select(path, TotalEvents:Dual))

Create a unique identifier by merging language with ID:

dyads <- mutate(dyads,
                ID = str_c(Language, '_', Participant)) %>% 
  select(-Participant) %>% 
  select(ID, Language, Condition, Gender, Total_Freq:Dual)

Sort by ID so that polite/informal are next to each other:

dyads <- arrange(dyads,
                 ID, Condition)

Check one more time:

dyads
## # A tibble: 54 x 17
##    ID    Language Condition Gender Total_Freq Vertical_Big Lateral_Big
##    <chr> <chr>    <chr>     <chr>       <dbl>        <dbl>       <dbl>
##  1 Cata… Catalan  Friend    M              61           39          19
##  2 Cata… Catalan  Superior  M              78           60          13
##  3 Cata… Catalan  Friend    F              61           46          12
##  4 Cata… Catalan  Superior  F              73           42           7
##  5 Cata… Catalan  Friend    M              58           55          24
##  6 Cata… Catalan  Superior  M              51           43          14
##  7 Cata… Catalan  Friend    M             101           73          33
##  8 Cata… Catalan  Superior  M              82           52          31
##  9 Cata… Catalan  Friend    M              56           49          20
## 10 Cata… Catalan  Superior  M              74           62          19
## # … with 44 more rows, and 10 more variables: Sagittal_Big <dbl>,
## #   Both_Hands <dbl>, Shape_Open <dbl>, TotalEvents <dbl>, Path <dbl>,
## #   Manner <dbl>, Ground <dbl>, Character <dbl>, Observer <dbl>, Dual <dbl>

Add duration data

Load the duration data in:

cat_dur <- read_csv('../data/catalan_durations.csv')
kor_dur <- read_csv('../data/korean_durations.csv')

Add the label info:

cat_dur
## # A tibble: 14 x 4
##    PPT_ID_Iris PPT_ID Friend Prof  
##    <chr>        <dbl> <time> <time>
##  1 1                1 02:17  02:16 
##  2 2                2 01:30  01:47 
##  3 3                3 03:01  02:45 
##  4 4                4 02:10  01:55 
##  5 5                5 02:02  02:05 
##  6 6                6 02:44  02:03 
##  7 7                7 02:34  02:11 
##  8 8                8 02:26  02:13 
##  9 9                9 02:30  02:17 
## 10 10 (my 11)      11 01:57  02:45 
## 11 11 (my 12)      12 02:14  02:12 
## 12 12 (my 13)      13 03:00  02:53 
## 13 13 (my 14)      14 02:40  02:52 
## 14 14 (my 16)      16 01:11  01:19
kor_dur
## # A tibble: 13 x 3
##    PPT_ID Friend Prof  
##     <dbl> <time> <time>
##  1      1 02:32  02:18 
##  2      2 02:24  02:19 
##  3      3 03:07  02:18 
##  4      4 01:07  01:15 
##  5      5 02:07  01:38 
##  6      6 01:27  00:58 
##  7      7 02:20  02:12 
##  8      8 01:56  01:43 
##  9      9 02:26  01:46 
## 10     10 02:55  02:45 
## 11     11 01:23  01:26 
## 12     12 02:28  01:21 
## 13     13 02:17  01:18

Create unique identifiers:

cat_dur <- mutate(cat_dur,
                  ID = str_c('Catalan_', PPT_ID))
kor_dur <- mutate(kor_dur,
                  ID = str_c('Korean_', PPT_ID))

Append:

durs <- bind_rows(select(cat_dur, ID, Friend, Prof),
                  select(kor_dur, ID, Friend, Prof))

Make this into long format:

durs <- pivot_longer(durs, cols = Friend:Prof,
                     names_to = 'Condition',
                     values_to = 'Duration')

Transform times to seconds:

durs <- mutate(durs,
               Seconds = times(Duration) * 60 * 24)

Check:

durs
## # A tibble: 54 x 4
##    ID        Condition Duration Seconds
##    <chr>     <chr>     <time>   <times>
##  1 Catalan_1 Friend    02:17    137    
##  2 Catalan_1 Prof      02:16    136    
##  3 Catalan_2 Friend    01:30     90    
##  4 Catalan_2 Prof      01:47    107    
##  5 Catalan_3 Friend    03:01    181    
##  6 Catalan_3 Prof      02:45    165    
##  7 Catalan_4 Friend    02:10    130    
##  8 Catalan_4 Prof      01:55    115    
##  9 Catalan_5 Friend    02:02    122    
## 10 Catalan_5 Prof      02:05    125    
## # … with 44 more rows

Make the labels the same as in the main table:

durs <- mutate(durs,
               Condition = ifelse(Condition == 'Prof', 'Superior', Condition))

Merge the two:

dyads <- durs %>%
  select(-Duration) %>%
  right_join(dyads, by = c('ID', 'Condition'))

Check:

dyads <- mutate(dyads,
                Seconds = as.vector(Seconds))

Data visualization

Create a dataset with the average rate per speaker:

avgs <- dyads %>% 
  mutate(rate = Total_Freq / Seconds) %>% 
  group_by(ID, Condition, Language) %>% 
  summarize(rate_M = mean(rate))
## `summarise()` has grouped output by 'ID', 'Condition'. You can override using the `.groups` argument.

Make a variable that codes for whether they go down or not:

# Compute differences:

diffs <- avgs %>% group_by(ID) %>% 
  summarize(diff = diff(rate_M)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

avgs <- left_join(avgs, diffs)
## Joining, by = "ID"

Plot this:

rate_p <- avgs %>% ggplot(aes(x = Condition, y = rate_M,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Gestures per second') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

rate_p

ggsave(plot = rate_p, filename = '../figures/gesture_rate.pdf',
       width = 6, height = 4)

Settings for Bayesian analysis (same across all)

Settings for parallel processing:

options(mc.cores=parallel::detectCores())

Weakly informative priors:

my_priors <- c(prior('normal(0, 2)', class = 'b'))

Control parameters for MCMC sampling:

my_controls = list(adapt_delta = 0.999,
                   max_treedepth = 13)

Iterations for all chains:

my_iter <- 6000
my_warmup <- 4000

Analysis: Gesture frequency

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Total_Freq),
            TotalDur = sum(Seconds)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'),
         Rate = Freq / TotalDur *  60) # per minute
## # A tibble: 2 x 6
##   Condition  Freq TotalDur  Prop Percentage  Rate
## * <chr>     <dbl>    <dbl> <dbl> <chr>      <dbl>
## 1 Friend     1453     3645  0.54 54%         23.9
## 2 Superior   1217     3290  0.46 46%         22.2

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Total_Freq),
            Dur = sum(Seconds))
## `summarise()` has grouped output by 'Language'. You can override using the `.groups` argument.
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'),
         Rate = Freq / Dur * 60)
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 8
##   Language Total Condition  Freq   Dur  Prop Percentage  Rate
##   <chr>    <dbl> <chr>     <dbl> <dbl> <dbl> <chr>      <dbl>
## 1 Catalan   1498 Friend      764  1936  0.51 51%         23.7
## 2 Catalan   1498 Superior    734  1893  0.49 49%         23.3
## 3 Korean    1172 Friend      689  1709  0.59 59%         24.2
## 4 Korean    1172 Superior    483  1397  0.41 41%         20.7

Check total frequency by participant:

freqs <- dyads %>% select(Language, ID, Condition, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = Total_Freq) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

freqs %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1      61       78         17
##  2 Catalan  Catalan_2      31       40          9
##  3 Catalan  Catalan_3      81       49        -32
##  4 Catalan  Catalan_4      32       49         17
##  5 Catalan  Catalan_5      39       32         -7
##  6 Catalan  Catalan_6      35       30         -5
##  7 Catalan  Catalan_7      88       73        -15
##  8 Catalan  Catalan_8      59       47        -12
##  9 Catalan  Catalan_9      22       13         -9
## 10 Catalan  Catalan_11     61       73         12
## 11 Catalan  Catalan_12     58       51         -7
## 12 Catalan  Catalan_13    101       82        -19
## 13 Catalan  Catalan_14     56       74         18
## 14 Catalan  Catalan_16     40       43          3
## 15 Korean   Korean_1       91       76        -15
## 16 Korean   Korean_2       62       37        -25
## 17 Korean   Korean_3       89       49        -40
## 18 Korean   Korean_4       30       30          0
## 19 Korean   Korean_5       33       11        -22
## 20 Korean   Korean_6       26       13        -13
## 21 Korean   Korean_7       63       62         -1
## 22 Korean   Korean_8       51       27        -24
## 23 Korean   Korean_9       67       49        -18
## 24 Korean   Korean_10      61       61          0
## 25 Korean   Korean_11      31       16        -15
## 26 Korean   Korean_12      63       42        -21
## 27 Korean   Korean_13      22       10        -12

Very consistent picture where Koreans always have less gestures in the superior condition.

Compute for how many this is:

freqs <- freqs %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

freqs %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     8
## 2 Catalan  more polite     6
## 3 Korean   less polite    11
## 4 Korean   same            2

Add the rate information to that:

durs <- dyads %>% select(Language, ID, Condition, Seconds) %>% 
  pivot_wider(names_from = Condition, values_from = Seconds)

rates <- left_join(freqs, durs, by = c('Language', 'ID')) %>% 
  rename(Friend_N = Friend.x,
         Superior_N = Superior.x,
         Friend_dur = Friend.y,
         Superior_dur = Superior.y)

Calculate the rate difference:

rates <- mutate(rates,
                Friend_rate = Friend_N / Friend_dur,
                Superior_rate = Superior_N / Superior_dur,
                PoliteDiff = Superior_rate - Friend_rate)

# Check:

rates %>% select(Language, ID, PoliteDiff) %>% print(n = Inf)
## # A tibble: 27 x 3
##    Language ID         PoliteDiff
##    <chr>    <chr>           <dbl>
##  1 Catalan  Catalan_1     0.128  
##  2 Catalan  Catalan_2     0.0294 
##  3 Catalan  Catalan_3    -0.151  
##  4 Catalan  Catalan_4     0.180  
##  5 Catalan  Catalan_5    -0.0637 
##  6 Catalan  Catalan_6     0.0305 
##  7 Catalan  Catalan_7    -0.0142 
##  8 Catalan  Catalan_8    -0.0507 
##  9 Catalan  Catalan_9    -0.0518 
## 10 Catalan  Catalan_11   -0.0789 
## 11 Catalan  Catalan_12   -0.0465 
## 12 Catalan  Catalan_13   -0.0871 
## 13 Catalan  Catalan_14    0.0802 
## 14 Catalan  Catalan_16   -0.0191 
## 15 Korean   Korean_1     -0.0480 
## 16 Korean   Korean_2     -0.164  
## 17 Korean   Korean_3     -0.121  
## 18 Korean   Korean_4     -0.0478 
## 19 Korean   Korean_5     -0.148  
## 20 Korean   Korean_6     -0.0747 
## 21 Korean   Korean_7      0.0197 
## 22 Korean   Korean_8     -0.178  
## 23 Korean   Korean_9      0.00336
## 24 Korean   Korean_10     0.0211 
## 25 Korean   Korean_11    -0.187  
## 26 Korean   Korean_12     0.0928 
## 27 Korean   Korean_13    -0.0324

Compute for how many the rate lowered / increased:

freqs <- freqs %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', 'less polite'))

# Count:

freqs %>% count(Language, Category)
## # A tibble: 3 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     8
## 2 Catalan  more polite     6
## 3 Korean   less polite    13

Make a plot of this. First, setup data frame for plotting:

freq_avg <- filter(rates,
                   # for Anna Borghi's conference:
                   Language == "Korean") %>% 
  select(ID, Friend_rate, Superior_rate) %>% 
  pivot_longer(Friend_rate:Superior_rate,
               names_to = 'Condition',
               values_to = 'rate') %>% 
  mutate(Condition = ifelse(Condition == 'Friend_rate',
                            'with\nfriend', 'with\nprofessor'))
                   
# Check:

freq_avg
## # A tibble: 26 x 3
##    ID       Condition          rate
##    <chr>    <chr>             <dbl>
##  1 Korean_1 "with\nfriend"    0.599
##  2 Korean_1 "with\nprofessor" 0.551
##  3 Korean_2 "with\nfriend"    0.431
##  4 Korean_2 "with\nprofessor" 0.266
##  5 Korean_3 "with\nfriend"    0.476
##  6 Korean_3 "with\nprofessor" 0.355
##  7 Korean_4 "with\nfriend"    0.448
##  8 Korean_4 "with\nprofessor" 0.4  
##  9 Korean_5 "with\nfriend"    0.260
## 10 Korean_5 "with\nprofessor" 0.112
## # … with 16 more rows

For a Poisson model, we’ll work with the long format, but we’ll have to make language and condition into sum-coded factors to interpret the interaction:

# Make into factor:

dyads <- mutate(dyads,
                Language_c = factor(Language),
                Condition_c = factor(Condition))

# Deviation code with Catalan and Friend as reference level (which is the multiplication by -1)

contrasts(dyads$Language_c) <- (contr.sum(2) * -1) / 2
contrasts(dyads$Condition_c) <- (contr.sum(2) * -1) / 2

Durations should be log-transformed:

dyads <- mutate(dyads,
                LogDur = log(Seconds))

Fit Bayesian model:

freq_mdl <- brm(Total_Freq ~ Language_c * Condition_c +
                  offset(LogDur) +
                  (1 + Condition_c|ID),
      data = dyads, family = 'negbinomial',
      prior = my_priors,
      control = my_controls,
      seed = 42,
      init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1

Model summary:

summary(freq_mdl)
##  Family: negbinomial 
##   Links: mu = log; shape = identity 
## Formula: Total_Freq ~ Language_c * Condition_c + offset(LogDur) + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.38      0.07     0.27     0.53 1.00     2343
## sd(Condition_c1)                0.11      0.07     0.01     0.27 1.00     3077
## cor(Intercept,Condition_c1)     0.43      0.45    -0.73     0.98 1.00     7278
##                             Tail_ESS
## sd(Intercept)                   3740
## sd(Condition_c1)                3308
## cor(Intercept,Condition_c1)     4806
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.03      0.08    -1.19    -0.87 1.00     1957
## Language_c1                 -0.08      0.16    -0.39     0.23 1.00     1832
## Condition_c1                -0.12      0.06    -0.24    -0.00 1.00     8233
## Language_c1:Condition_c1    -0.17      0.12    -0.41     0.06 1.00     7652
##                          Tail_ESS
## Intercept                    2964
## Language_c1                  2630
## Condition_c1                 6033
## Language_c1:Condition_c1     5888
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## shape    95.84     68.11    23.69   283.36 1.00     3295     3826
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

What does the interaction mean? For this the sum-codes need to be checked:

contrasts(dyads$Language_c)
##         [,1]
## Catalan -0.5
## Korean   0.5
contrasts(dyads$Condition_c)
##          [,1]
## Friend   -0.5
## Superior  0.5

Get the posterior probability of the interaction and language effects:

posts <- posterior_samples(freq_mdl)

Check whether the condition effect is above zero:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.023375

Check whether the interaction is above zero:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.075625

Posterior predictive checks to see whether our model could’ve predicted the data.

pp_check(freq_mdl, nsamples = 100)

Looks good.

Analysis: Gesture size

Check frequency of vertical and lateral large gestures… which needs to be divided by the total frequency (since the large gestures are a subset of). If we don’t calculate proportions, then the result would just be rehashing the total frequency result, as we know that overall gesture rates are lower for superior.

vertical <- dyads %>%
  select(Language, ID, Condition, Vertical_Big, Total_Freq) %>% 
  pivot_wider(names_from = Condition,
              values_from = c(Vertical_Big, Total_Freq)) %>%
  mutate(Vertical_Big_Superior = Vertical_Big_Superior / Total_Freq_Superior,
         Vertical_Big_Friend = Vertical_Big_Friend / Total_Freq_Friend,
         PoliteDiff = Vertical_Big_Superior - Vertical_Big_Friend)

lateral <- dyads %>%
  select(Language, ID, Condition, Lateral_Big, Total_Freq) %>% 
  pivot_wider(names_from = Condition,
              values_from = c(Lateral_Big, Total_Freq)) %>%
  mutate(Lateral_Big_Superior = Lateral_Big_Superior / Total_Freq_Superior,
         Lateral_Big_Friend = Lateral_Big_Friend / Total_Freq_Friend,
         PoliteDiff = Lateral_Big_Superior - Lateral_Big_Friend)

sagittal <- dyads %>%
  select(Language, ID, Condition, Sagittal_Big, Total_Freq) %>% 
  pivot_wider(names_from = Condition,
              values_from = c(Sagittal_Big, Total_Freq)) %>%
  mutate(Sagittal_Big_Superior = Sagittal_Big_Superior / Total_Freq_Superior,
         Sagittal_Big_Friend = Sagittal_Big_Friend / Total_Freq_Friend,
         PoliteDiff = Sagittal_Big_Superior - Sagittal_Big_Friend)

# Check:

vertical %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Vertical_Big_Fr… Vertical_Big_Su… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…            0.639           0.769                61
##  2 Catalan  Cata…            0.806           0.775                31
##  3 Catalan  Cata…            0.901           0.918                81
##  4 Catalan  Cata…            0.312           0.143                32
##  5 Catalan  Cata…            0.821           0.906                39
##  6 Catalan  Cata…            0.743           0.667                35
##  7 Catalan  Cata…            0.795           0.699                88
##  8 Catalan  Cata…            0.627           0.617                59
##  9 Catalan  Cata…            0.364           0.385                22
## 10 Catalan  Cata…            0.754           0.575                61
## 11 Catalan  Cata…            0.948           0.843                58
## 12 Catalan  Cata…            0.723           0.634               101
## 13 Catalan  Cata…            0.875           0.838                56
## 14 Catalan  Cata…            0.6             0.837                40
## 15 Korean   Kore…            0.824           0.934                91
## 16 Korean   Kore…            0.323           0.0270               62
## 17 Korean   Kore…            0.753           0.939                89
## 18 Korean   Kore…            0.933           0.9                  30
## 19 Korean   Kore…            0.727           0.545                33
## 20 Korean   Kore…            0.962           0.692                26
## 21 Korean   Kore…            0.825           0.919                63
## 22 Korean   Kore…            0.863           0.407                51
## 23 Korean   Kore…            0.985           0.673                67
## 24 Korean   Kore…            0.836           0.967                61
## 25 Korean   Kore…            0.871           0.875                31
## 26 Korean   Kore…            0.841           0.810                63
## 27 Korean   Kore…            0.636           0.4                  22
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>
lateral %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Lateral_Big_Fri… Lateral_Big_Sup… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…           0.311            0.167                61
##  2 Catalan  Cata…           0.419            0.4                  31
##  3 Catalan  Cata…           0.346            0.388                81
##  4 Catalan  Cata…           0.0938           0.0408               32
##  5 Catalan  Cata…           0.410            0.281                39
##  6 Catalan  Cata…           0.2              0.333                35
##  7 Catalan  Cata…           0.534            0.411                88
##  8 Catalan  Cata…           0.237            0.128                59
##  9 Catalan  Cata…           0.0909           0.0769               22
## 10 Catalan  Cata…           0.197            0.0959               61
## 11 Catalan  Cata…           0.414            0.275                58
## 12 Catalan  Cata…           0.327            0.378               101
## 13 Catalan  Cata…           0.357            0.257                56
## 14 Catalan  Cata…           0.475            0.442                40
## 15 Korean   Kore…           0.330            0.211                91
## 16 Korean   Kore…           0.0806           0                    62
## 17 Korean   Kore…           0.315            0.306                89
## 18 Korean   Kore…           0.433            0.367                30
## 19 Korean   Kore…           0.0909           0                    33
## 20 Korean   Kore…           0.0385           0                    26
## 21 Korean   Kore…           0.508            0.548                63
## 22 Korean   Kore…           0.275            0.0370               51
## 23 Korean   Kore…           0.224            0.204                67
## 24 Korean   Kore…           0.541            0.393                61
## 25 Korean   Kore…           0.0645           0                    31
## 26 Korean   Kore…           0.238            0.0714               63
## 27 Korean   Kore…           0.545            0                    22
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>
sagittal %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Sagittal_Big_Fr… Sagittal_Big_Su… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…           0                     0               61
##  2 Catalan  Cata…           0                     0               31
##  3 Catalan  Cata…           0.0123                0               81
##  4 Catalan  Cata…           0                     0               32
##  5 Catalan  Cata…           0                     0               39
##  6 Catalan  Cata…           0                     0               35
##  7 Catalan  Cata…           0                     0               88
##  8 Catalan  Cata…           0                     0               59
##  9 Catalan  Cata…           0                     0               22
## 10 Catalan  Cata…           0                     0               61
## 11 Catalan  Cata…           0.0172                0               58
## 12 Catalan  Cata…           0.0198                0              101
## 13 Catalan  Cata…           0                     0               56
## 14 Catalan  Cata…           0.15                  0               40
## 15 Korean   Kore…           0                     0               91
## 16 Korean   Kore…           0                     0               62
## 17 Korean   Kore…           0.0112                0               89
## 18 Korean   Kore…           0                     0               30
## 19 Korean   Kore…           0                     0               33
## 20 Korean   Kore…           0                     0               26
## 21 Korean   Kore…           0                     0               63
## 22 Korean   Kore…           0.0196                0               51
## 23 Korean   Kore…           0                     0               67
## 24 Korean   Kore…           0                     0               61
## 25 Korean   Kore…           0                     0               31
## 26 Korean   Kore…           0                     0               63
## 27 Korean   Kore…           0                     0               22
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

For plotting, create long format data frames:

lateral_avg <- lateral %>% select(Language:Lateral_Big_Superior) %>% 
  pivot_longer(cols = Lateral_Big_Friend:Lateral_Big_Superior,
               names_to = 'Condition',
               values_to = 'Proportion') %>% 
  mutate(Condition = ifelse(Condition == 'Lateral_Big_Friend', 'Friend', 'Superior'))

Create the difference labels:

# Compute differences:

diffs <- lateral_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

lateral_avg <- left_join(lateral_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

lateral_p <- lateral_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\nlarge gestures (lateral)') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

lateral_p

ggsave(plot = lateral_p, filename = '../figures/lateral_gestures.pdf',
       width = 6, height = 4)

For plotting, create long format data frames:

sagittal_avg <- sagittal %>% select(Language:Sagittal_Big_Superior) %>% 
  pivot_longer(cols = Sagittal_Big_Friend:Sagittal_Big_Superior,
               names_to = 'Condition',
               values_to = 'Proportion') %>% 
  mutate(Condition = ifelse(Condition == 'Sagittal_Big_Friend', 'Friend', 'Superior'))

Create the difference labels:

# Compute differences:

diffs <- sagittal_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

sagittal_avg <- left_join(sagittal_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

sagittal_p <- sagittal_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\nlarge gestures (sagittal)') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

sagittal_p

ggsave(plot = sagittal_p, filename = '../figures/sagittal_gestures.pdf',
       width = 6, height = 4)

For plotting, create long format data frames:

vertical_avg <- vertical %>% select(Language:Vertical_Big_Superior) %>% 
  pivot_longer(cols = Vertical_Big_Friend:Vertical_Big_Superior,
               names_to = 'Condition',
               values_to = 'Proportion') %>% 
  mutate(Condition = ifelse(Condition == 'Vertical_Big_Friend', 'Friend', 'Superior'))

Create the difference labels:

# Compute differences:

diffs <- vertical_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

vertical_avg <- left_join(vertical_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

vertical_p <- vertical_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\nlarge gestures (vertical)') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

vertical_p

ggsave(plot = vertical_p, filename = '../figures/vertical_gestures.pdf',
       width = 6, height = 4)

Very consistent picture where Koreans always have less gestures in the superior condition. Compute for how many this is:

vertical <- vertical %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))
lateral <- lateral %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))
sagittal <- sagittal %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

vertical %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     5
## 3 Korean   less polite     8
## 4 Korean   more polite     5
lateral %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    11
## 2 Catalan  more polite     3
## 3 Korean   less polite    12
## 4 Korean   more polite     1
sagittal %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     4
## 2 Catalan  same           10
## 3 Korean   less polite     2
## 4 Korean   same           11

Create a model for big gestures — can’t do this because the denominator isn’t right:

vertical_mdl <- brm(Vertical_Big | trials(Total_Freq) ~ Language_c * Condition_c +
                 (1 + Condition_c|ID),
               data = dyads, family = binomial,
               prior = my_priors, control = my_controls,
               seed = 42,
               init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Compiling Stan program...
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
## Start sampling
lateral_mdl <- brm(Lateral_Big | trials(Total_Freq) ~ Language_c * Condition_c +
                 (1 + Condition_c|ID),
               data = dyads, family = binomial,
               prior = my_priors, control = my_controls,
               seed = 42,
               init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
## Start sampling
sagittal_mdl <- brm(Sagittal_Big | trials(Total_Freq) ~ Language_c * Condition_c +
                 (1 + Condition_c|ID),
               data = dyads, family = binomial,
               prior = my_priors, control = my_controls,
               seed = 42,
               init = 0, chains = 4, iter = my_iter, warmup = my_warmup)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -mmacosx-version-min=10.13 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.0/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DBOOST_NO_AUTO_PTR  -include '/Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/usr/local/include   -fPIC  -Wall -g -O2  -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
##                ^
##                ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.0/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
##          ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
## Start sampling

Summarize model:

summary(vertical_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Vertical_Big | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   1.14      0.19     0.84     1.57 1.00     2223
## sd(Condition_c1)                1.07      0.22     0.70     1.56 1.00     2856
## cor(Intercept,Condition_c1)     0.45      0.21    -0.02     0.79 1.00     2821
##                             Tail_ESS
## sd(Intercept)                   3435
## sd(Condition_c1)                5088
## cor(Intercept,Condition_c1)     3928
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                    1.16      0.24     0.68     1.62 1.00     1207
## Language_c1                  0.37      0.43    -0.49     1.23 1.00     1388
## Condition_c1                -0.28      0.24    -0.77     0.19 1.00     2053
## Language_c1:Condition_c1    -0.28      0.47    -1.19     0.63 1.00     2605
##                          Tail_ESS
## Intercept                    2065
## Language_c1                  2742
## Condition_c1                 3332
## Language_c1:Condition_c1     4366
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(lateral_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Lateral_Big | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   1.01      0.18     0.72     1.43 1.00     2244
## sd(Condition_c1)                0.42      0.20     0.07     0.85 1.00     2488
## cor(Intercept,Condition_c1)     0.69      0.29    -0.11     0.99 1.00     3861
##                             Tail_ESS
## sd(Intercept)                   3776
## sd(Condition_c1)                2476
## cor(Intercept,Condition_c1)     3739
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.27      0.21    -1.69    -0.86 1.00     1202
## Language_c1                 -0.49      0.41    -1.32     0.29 1.00     1278
## Condition_c1                -0.57      0.16    -0.91    -0.30 1.00     2755
## Language_c1:Condition_c1    -0.38      0.28    -1.00     0.12 1.00     3183
##                          Tail_ESS
## Intercept                    2296
## Language_c1                  2583
## Condition_c1                 3685
## Language_c1:Condition_c1     3974
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(sagittal_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Sagittal_Big | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   1.39      0.85     0.10     3.37 1.00     2314
## sd(Condition_c1)                2.02      1.36     0.11     5.16 1.00     3144
## cor(Intercept,Condition_c1)    -0.29      0.51    -0.98     0.82 1.00     3438
##                             Tail_ESS
## sd(Intercept)                   2325
## sd(Condition_c1)                4587
## cor(Intercept,Condition_c1)     3154
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -7.96      1.06   -10.43    -6.30 1.00     5711
## Language_c1                 -0.76      1.15    -3.12     1.51 1.00     8288
## Condition_c1                -2.35      1.30    -4.96     0.09 1.00     8281
## Language_c1:Condition_c1     0.81      1.64    -2.43     4.05 1.00    10675
##                          Tail_ESS
## Intercept                    4302
## Language_c1                  5684
## Condition_c1                 6497
## Language_c1:Condition_c1     5799
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probabilities of the big_mdl:

vertical_posts <- posterior_samples(vertical_mdl)
lateral_posts <- posterior_samples(lateral_mdl)
sagittal_posts <- posterior_samples(sagittal_mdl)

Posterior probability of the condition effect being above 0:

sum(vertical_posts$b_Condition_c1 > 0) / nrow(vertical_posts)
## [1] 0.119125
sum(lateral_posts$b_Condition_c1 > 0) / nrow(lateral_posts)
## [1] 0
sum(sagittal_posts$b_Condition_c1 > 0) / nrow(sagittal_posts)
## [1] 0.029

Posterior probability of the interaction effect being above 0:

sum(vertical_posts$`b_Language_c1:Condition_c1` > 0) / nrow(vertical_posts)
## [1] 0.27075
sum(lateral_posts$`b_Language_c1:Condition_c1` > 0) / nrow(lateral_posts)
## [1] 0.07125
sum(sagittal_posts$`b_Language_c1:Condition_c1` > 0) / nrow(sagittal_posts)
## [1] 0.694875

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(vertical_mdl, nsamples = 100)

Lateral:

pp_check(lateral_mdl, nsamples = 100)

Sagittal:

pp_check(sagittal_mdl, nsamples = 100)

Analysis: Use of two-handed gestures

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Both_Hands)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## # A tibble: 2 x 4
##   Condition  Freq  Prop Percentage
## * <chr>     <dbl> <dbl> <chr>     
## 1 Friend      659  0.53 53%       
## 2 Superior    578  0.47 47%

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Both_Hands))
## `summarise()` has grouped output by 'Language'. You can override using the `.groups` argument.
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 6
##   Language Total Condition  Freq  Prop Percentage
##   <chr>    <dbl> <chr>     <dbl> <dbl> <chr>     
## 1 Catalan    677 Friend      364  0.54 54%       
## 2 Catalan    677 Superior    313  0.46 46%       
## 3 Korean     560 Friend      295  0.53 53%       
## 4 Korean     560 Superior    265  0.47 47%

Check frequency of two-handed gestures:

both <- dyads %>% select(Language, ID, Condition, Both_Hands, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Both_Hands, Total_Freq)) %>% 
  mutate(Both_Hands_Superior = Both_Hands_Superior / Total_Freq_Superior, 
         Both_Hands_Friend = Both_Hands_Friend / Total_Freq_Friend,
         PoliteDiff = Both_Hands_Superior - Both_Hands_Friend)

# Check:

both %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Both_Hands_Frie… Both_Hands_Supe… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…           0.590            0.628                61
##  2 Catalan  Cata…           0.516            0.425                31
##  3 Catalan  Cata…           0.346            0.143                81
##  4 Catalan  Cata…           0.531            0.286                32
##  5 Catalan  Cata…           0.410            0.0938               39
##  6 Catalan  Cata…           0.229            0.1                  35
##  7 Catalan  Cata…           0.489            0.589                88
##  8 Catalan  Cata…           0.542            0.681                59
##  9 Catalan  Cata…           0                0                    22
## 10 Catalan  Cata…           0.541            0.767                61
## 11 Catalan  Cata…           0.741            0.706                58
## 12 Catalan  Cata…           0.653            0.415               101
## 13 Catalan  Cata…           0.25             0.230                56
## 14 Catalan  Cata…           0.3              0.0465               40
## 15 Korean   Kore…           0.473            0.487                91
## 16 Korean   Kore…           0.5              0.595                62
## 17 Korean   Kore…           0.483            0.653                89
## 18 Korean   Kore…           0.467            0.767                30
## 19 Korean   Kore…           0.0303           0                    33
## 20 Korean   Kore…           0.462            0.462                26
## 21 Korean   Kore…           0.524            0.435                63
## 22 Korean   Kore…           0.373            0.556                51
## 23 Korean   Kore…           0.224            0.633                67
## 24 Korean   Kore…           0.426            0.508                61
## 25 Korean   Kore…           0.129            0                    31
## 26 Korean   Kore…           0.651            0.786                63
## 27 Korean   Kore…           0.591            0.8                  22
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

both <- both %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

both %>% count(Language, Category)
## # A tibble: 6 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     4
## 3 Catalan  same            1
## 4 Korean   less polite     3
## 5 Korean   more polite     9
## 6 Korean   same            1

Create a data visualization of this:

both_avg <- both %>% select(Language:Both_Hands_Superior) %>% 
  pivot_longer(cols = Both_Hands_Friend:Both_Hands_Superior,
               names_to = 'Condition',
               values_to = 'Proportion') %>% 
  mutate(Condition = ifelse(Condition == 'Both_Hands_Friend', 'Friend', 'Superior'))

Create the difference labels:

# Compute differences:

diffs <- both_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

both_avg <- left_join(both_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

both_p <- both_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\ntwo-handed gestures') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

both_p

ggsave(plot = both_p, filename = '../figures/both_gestures.pdf',
       width = 6, height = 4)

Create a model for big gestures:

both_mdl <- brm(Both_Hands | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize model:

summary(both_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Both_Hands | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   1.26      0.23     0.88     1.78 1.00     1732
## sd(Condition_c1)                0.85      0.21     0.50     1.33 1.00     3150
## cor(Intercept,Condition_c1)     0.71      0.18     0.24     0.94 1.00     3152
##                             Tail_ESS
## sd(Intercept)                   3495
## sd(Condition_c1)                4683
## cor(Intercept,Condition_c1)     4785
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -0.43      0.25    -0.94     0.06 1.00     1523
## Language_c1                  0.25      0.48    -0.70     1.16 1.00     1439
## Condition_c1                -0.08      0.21    -0.51     0.30 1.00     2460
## Language_c1:Condition_c1     0.86      0.38     0.11     1.62 1.00     2554
##                          Tail_ESS
## Intercept                    2554
## Language_c1                  2762
## Condition_c1                 3385
## Language_c1:Condition_c1     4001
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probabilities of the both_mdl:

posts <- posterior_samples(both_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.356875

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.98625

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(both_mdl, nsamples = 100)

Analysis: Use of open-handed gestures

Overall number of gestures per condition:

dyads %>% group_by(Condition) %>% 
  summarize(Freq = sum(Shape_Open)) %>% 
  mutate(Prop = Freq / sum(Freq),
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## # A tibble: 2 x 4
##   Condition  Freq  Prop Percentage
## * <chr>     <dbl> <dbl> <chr>     
## 1 Friend     1017  0.56 56%       
## 2 Superior    785  0.44 44%

Overall number of gestures per condition per language:

# Tally by language and condition:

freq_count <- dyads %>% group_by(Language, Condition) %>% 
  summarize(Freq = sum(Shape_Open))
## `summarise()` has grouped output by 'Language'. You can override using the `.groups` argument.
# Take sums by language for calculating proportions, and calculate them:

freq_count <- freq_count %>% group_by(Language) %>% 
  summarize(Total = sum(Freq)) %>%
  right_join(freq_count) %>% 
  mutate(Prop = Freq / Total,
         Prop = round(Prop, 2),
         Percentage = str_c(Prop * 100, '%'))
## Joining, by = "Language"
# Check:

freq_count
## # A tibble: 4 x 6
##   Language Total Condition  Freq  Prop Percentage
##   <chr>    <dbl> <chr>     <dbl> <dbl> <chr>     
## 1 Catalan   1056 Friend      603 0.570 57%       
## 2 Catalan   1056 Superior    453 0.43  43%       
## 3 Korean     746 Friend      414 0.55  55%       
## 4 Korean     746 Superior    332 0.45  45%

Check frequency of open-handed gestures per speaker:

open <- dyads %>% select(Language, ID, Condition, Shape_Open, Total_Freq) %>% 
  pivot_wider(names_from = Condition, values_from = c(Shape_Open, Total_Freq)) %>% 
  mutate(Shape_Open_Superior = Shape_Open_Superior / Total_Freq_Superior, 
         Shape_Open_Friend = Shape_Open_Friend / Total_Freq_Friend,
         PoliteDiff = Shape_Open_Superior - Shape_Open_Friend)

# Check:

open %>% print(n = Inf)
## # A tibble: 27 x 7
##    Language ID    Shape_Open_Frie… Shape_Open_Supe… Total_Freq_Frie…
##    <chr>    <chr>            <dbl>            <dbl>            <dbl>
##  1 Catalan  Cata…            0.656            0.769               61
##  2 Catalan  Cata…            0.903            0.5                 31
##  3 Catalan  Cata…            0.630            0.388               81
##  4 Catalan  Cata…            0.719            0.633               32
##  5 Catalan  Cata…            1                0.812               39
##  6 Catalan  Cata…            0.829            0.633               35
##  7 Catalan  Cata…            0.784            0.562               88
##  8 Catalan  Cata…            0.695            0.723               59
##  9 Catalan  Cata…            0.682            0.462               22
## 10 Catalan  Cata…            0.934            0.781               61
## 11 Catalan  Cata…            1                0.843               58
## 12 Catalan  Cata…            0.713            0.390              101
## 13 Catalan  Cata…            0.804            0.608               56
## 14 Catalan  Cata…            0.9              0.465               40
## 15 Korean   Kore…            0.582            0.539               91
## 16 Korean   Kore…            0.565            0.432               62
## 17 Korean   Kore…            0.438            0.857               89
## 18 Korean   Kore…            0.667            0.8                 30
## 19 Korean   Kore…            0.333            0.182               33
## 20 Korean   Kore…            0.577            0.538               26
## 21 Korean   Kore…            0.905            0.839               63
## 22 Korean   Kore…            0.588            0.852               51
## 23 Korean   Kore…            0.866            0.939               67
## 24 Korean   Kore…            0.689            0.639               61
## 25 Korean   Kore…            0.226            0.438               31
## 26 Korean   Kore…            0.556            0.643               63
## 27 Korean   Kore…            0.545            0.6                 22
## # … with 2 more variables: Total_Freq_Superior <dbl>, PoliteDiff <dbl>

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

open <- open %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

open %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    12
## 2 Catalan  more polite     2
## 3 Korean   less polite     6
## 4 Korean   more polite     7

Create a data visualization of this:

open_avg <- open %>% select(Language:Shape_Open_Superior) %>% 
  pivot_longer(cols = Shape_Open_Friend:Shape_Open_Superior,
               names_to = 'Condition',
               values_to = 'Proportion') %>% 
  mutate(Condition = ifelse(Condition == 'Shape_Open_Friend', 'Friend', 'Superior'))

Create the difference labels:

# Compute differences:

diffs <- open_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

open_avg <- left_join(open_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

open_p <- open_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\nopen-handed gestures') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

open_p

ggsave(plot = open_p, filename = '../figures/open_handed_gestures.pdf',
       width = 6, height = 4)

Create a model for big gestures:

open_mdl <- brm(Shape_Open | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize model:

summary(open_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Shape_Open | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.85      0.15     0.61     1.18 1.00     2004
## sd(Condition_c1)                0.76      0.17     0.47     1.16 1.00     3016
## cor(Intercept,Condition_c1)    -0.17      0.26    -0.64     0.37 1.00     3386
##                             Tail_ESS
## sd(Intercept)                   3363
## sd(Condition_c1)                4708
## cor(Intercept,Condition_c1)     5094
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                    0.82      0.17     0.48     1.17 1.00     1447
## Language_c1                 -0.48      0.35    -1.19     0.19 1.00     1446
## Condition_c1                -0.38      0.19    -0.76    -0.02 1.00     2929
## Language_c1:Condition_c1     1.39      0.36     0.67     2.10 1.00     2703
##                          Tail_ESS
## Intercept                    2763
## Language_c1                  2597
## Condition_c1                 3962
## Language_c1:Condition_c1     4193
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probability of the interaction and language effects for the open mdl:

posts <- posterior_samples(open_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.0205

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 1

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(open_mdl, nsamples = 100)

Analysis: path, manner, ground

What is the overall number of path, manner, and ground gestures?

dyads %>% summarize(Path = sum(Path),
                    Manner = sum(Manner),
                    Ground = sum(Ground))
## # A tibble: 1 x 3
##    Path Manner Ground
##   <dbl>  <dbl>  <dbl>
## 1   425    342    119

Calculate rate of path/manner/ground encoding per total gestures:

dyads <- mutate(dyads,
                Path_p = Path / Total_Freq,
                Manner_p = Manner / Total_Freq,
                Ground_p = Ground / Total_Freq)

Calculate path change by speaker and whether it changes based on superiority:

path <- dyads %>% select(Language, ID, Condition, Path_p) %>% 
  pivot_wider(names_from = Condition, values_from = Path_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

path %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.148    0.128    -0.0193 
##  2 Catalan  Catalan_2  0.258    0.075    -0.183  
##  3 Catalan  Catalan_3  0.0988   0.143     0.0441 
##  4 Catalan  Catalan_4  0.25     0.143    -0.107  
##  5 Catalan  Catalan_5  0.256    0.188    -0.0689 
##  6 Catalan  Catalan_6  0.257    0.267     0.00952
##  7 Catalan  Catalan_7  0.148    0.123    -0.0244 
##  8 Catalan  Catalan_8  0.153    0.128    -0.0249 
##  9 Catalan  Catalan_9  0.318    0.385     0.0664 
## 10 Catalan  Catalan_11 0.115    0.110    -0.00517
## 11 Catalan  Catalan_12 0.103    0.157     0.0534 
## 12 Catalan  Catalan_13 0.0990   0.0976   -0.00145
## 13 Catalan  Catalan_14 0.0893   0.149     0.0594 
## 14 Catalan  Catalan_16 0.225    0.209    -0.0157 
## 15 Korean   Korean_1   0.143    0.158     0.0150 
## 16 Korean   Korean_2   0.129    0.189     0.0602 
## 17 Korean   Korean_3   0.135    0.224     0.0897 
## 18 Korean   Korean_4   0.2      0.167    -0.0333 
## 19 Korean   Korean_5   0.212    0.455     0.242  
## 20 Korean   Korean_6   0.423    0.462     0.0385 
## 21 Korean   Korean_7   0.111    0.145     0.0341 
## 22 Korean   Korean_8   0.196    0.0370   -0.159  
## 23 Korean   Korean_9   0.179    0.184     0.00457
## 24 Korean   Korean_10  0.164    0.148    -0.0164 
## 25 Korean   Korean_11  0.355    0.312    -0.0423 
## 26 Korean   Korean_12  0.0794   0.119     0.0397 
## 27 Korean   Korean_13  0.227    0.1      -0.127
# Speaker changes overall:

path <- path %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

path %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     9
## 2 Catalan  more polite     5
## 3 Korean   less polite     5
## 4 Korean   more polite     8

Same for manner:

manner <- dyads %>% select(Language, ID, Condition, Manner_p) %>% 
  pivot_wider(names_from = Condition, values_from = Manner_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

manner %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.148    0.0897  -0.0578  
##  2 Catalan  Catalan_2  0.194    0.1     -0.0935  
##  3 Catalan  Catalan_3  0.111    0.0408  -0.0703  
##  4 Catalan  Catalan_4  0.188    0.0408  -0.147   
##  5 Catalan  Catalan_5  0.256    0.188   -0.0689  
##  6 Catalan  Catalan_6  0.171    0.167   -0.00476 
##  7 Catalan  Catalan_7  0.0909   0.0822  -0.00872 
##  8 Catalan  Catalan_8  0.153    0.128   -0.0249  
##  9 Catalan  Catalan_9  0.273    0       -0.273   
## 10 Catalan  Catalan_11 0.148    0.0685  -0.0790  
## 11 Catalan  Catalan_12 0.155    0.118   -0.0375  
## 12 Catalan  Catalan_13 0.109    0.110    0.000845
## 13 Catalan  Catalan_14 0.0893   0.0676  -0.0217  
## 14 Catalan  Catalan_16 0.125    0.0698  -0.0552  
## 15 Korean   Korean_1   0.132    0.132   -0.000289
## 16 Korean   Korean_2   0.161    0.0811  -0.0802  
## 17 Korean   Korean_3   0.135    0.184    0.0488  
## 18 Korean   Korean_4   0.2      0.133   -0.0667  
## 19 Korean   Korean_5   0.152    0.0909  -0.0606  
## 20 Korean   Korean_6   0.346    0.308   -0.0385  
## 21 Korean   Korean_7   0.111    0.0806  -0.0305  
## 22 Korean   Korean_8   0.176    0.185    0.00871 
## 23 Korean   Korean_9   0.149    0.163    0.0140  
## 24 Korean   Korean_10  0.115    0.0984  -0.0164  
## 25 Korean   Korean_11  0.129    0.0625  -0.0665  
## 26 Korean   Korean_12  0.111    0.167    0.0556  
## 27 Korean   Korean_13  0.227    0.2     -0.0273
# Speaker changes overall:

manner <- manner %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

manner %>% count(Language, Category)
## # A tibble: 4 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    13
## 2 Catalan  more polite     1
## 3 Korean   less polite     9
## 4 Korean   more polite     4

Same for ground:

ground <- dyads %>% select(Language, ID, Condition, Ground_p) %>% 
  pivot_wider(names_from = Condition, values_from = Ground_p) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

ground %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID         Friend Superior PoliteDiff
##    <chr>    <chr>       <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1  0.0656   0.0769    0.0113 
##  2 Catalan  Catalan_2  0.0968   0.05     -0.0468 
##  3 Catalan  Catalan_3  0.0247   0        -0.0247 
##  4 Catalan  Catalan_4  0.0625   0.0204   -0.0421 
##  5 Catalan  Catalan_5  0.0513   0        -0.0513 
##  6 Catalan  Catalan_6  0.114    0.0667   -0.0476 
##  7 Catalan  Catalan_7  0.0455   0.0822    0.0367 
##  8 Catalan  Catalan_8  0.102    0        -0.102  
##  9 Catalan  Catalan_9  0.0455   0.0769    0.0315 
## 10 Catalan  Catalan_11 0.0984   0.0685   -0.0299 
## 11 Catalan  Catalan_12 0.0345   0.0784    0.0439 
## 12 Catalan  Catalan_13 0.0297   0.0244   -0.00531
## 13 Catalan  Catalan_14 0.0179   0.0270    0.00917
## 14 Catalan  Catalan_16 0        0         0      
## 15 Korean   Korean_1   0.0330   0.0263   -0.00665
## 16 Korean   Korean_2   0        0.0270    0.0270 
## 17 Korean   Korean_3   0.0449   0.0408   -0.00413
## 18 Korean   Korean_4   0.0667   0        -0.0667 
## 19 Korean   Korean_5   0        0         0      
## 20 Korean   Korean_6   0.115    0.0769   -0.0385 
## 21 Korean   Korean_7   0.0635   0.0645    0.00102
## 22 Korean   Korean_8   0.0588   0.0370   -0.0218 
## 23 Korean   Korean_9   0.0746   0.0408   -0.0338 
## 24 Korean   Korean_10  0.0328   0.0164   -0.0164 
## 25 Korean   Korean_11  0.0323   0        -0.0323 
## 26 Korean   Korean_12  0.0476   0.0476    0      
## 27 Korean   Korean_13  0.0909   0        -0.0909
# Speaker changes overall:

ground <- ground %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

ground %>% count(Language, Category)
## # A tibble: 6 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite     8
## 2 Catalan  more polite     5
## 3 Catalan  same            1
## 4 Korean   less polite     9
## 5 Korean   more polite     2
## 6 Korean   same            2

Data viz for manner:

For plotting, create long format data frames:

manner_avg <- manner %>% select(Language:Superior) %>% 
  pivot_longer(cols = Friend:Superior,
               names_to = 'Condition',
               values_to = 'Proportion')

Create the difference labels:

# Compute differences:

diffs <- manner_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

manner_avg <- left_join(manner_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

manner_p <- manner_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\nmanner gestures') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

manner_p

ggsave(plot = manner_p, filename = '../figures/manner_gestures.pdf',
       width = 6, height = 4)

Create models:

# Path:

path_mdl <- brm(Path | trials(Total_Freq) ~ Language_c * Condition_c +
                  (1 + Condition_c|ID),
                data = dyads, family = binomial,
                prior = my_priors, control = my_controls,
                seed = 42,
                init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

# Manner:

manner_mdl <- brm(Manner | trials(Total_Freq) ~ Language_c * Condition_c +
                    (1 + Condition_c|ID),
                  data = dyads, family = binomial,
                  prior = my_priors, control = my_controls,
                  seed = 42,
                  init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

# Ground:

ground_mdl <- brm(Ground | trials(Total_Freq) ~ Language_c * Condition_c +
                    (1 + Condition_c|ID),
                  data = dyads, family = binomial,
                  prior = my_priors, control = my_controls,
                  seed = 42,
                  init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize models:

summary(path_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Path | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.38      0.10     0.20     0.61 1.00     2517
## sd(Condition_c1)                0.15      0.11     0.01     0.42 1.00     3764
## cor(Intercept,Condition_c1)    -0.18      0.55    -0.97     0.90 1.00     6818
##                             Tail_ESS
## sd(Intercept)                   4210
## sd(Condition_c1)                3298
## cor(Intercept,Condition_c1)     4634
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.60      0.09    -1.79    -1.41 1.00     3012
## Language_c1                  0.18      0.19    -0.19     0.56 1.00     3544
## Condition_c1                -0.01      0.12    -0.24     0.21 1.00     8798
## Language_c1:Condition_c1     0.16      0.23    -0.29     0.61 1.00     8383
##                          Tail_ESS
## Intercept                    4080
## Language_c1                  4795
## Condition_c1                 6010
## Language_c1:Condition_c1     4491
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(manner_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Manner | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.15      0.10     0.01     0.37 1.00     2218
## sd(Condition_c1)                0.14      0.11     0.01     0.41 1.00     4735
## cor(Intercept,Condition_c1)     0.03      0.58    -0.94     0.95 1.00     6813
##                             Tail_ESS
## sd(Intercept)                   3874
## sd(Condition_c1)                4390
## cor(Intercept,Condition_c1)     5211
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.93      0.07    -2.07    -1.79 1.00     6638
## Language_c1                  0.26      0.14    -0.01     0.54 1.00     7064
## Condition_c1                -0.32      0.13    -0.57    -0.07 1.00     9681
## Language_c1:Condition_c1     0.40      0.25    -0.08     0.89 1.00     9854
##                          Tail_ESS
## Intercept                    5256
## Language_c1                  5720
## Condition_c1                 6042
## Language_c1:Condition_c1     6109
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(ground_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Ground | trials(Total_Freq) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.33      0.18     0.03     0.72 1.00     1991
## sd(Condition_c1)                0.28      0.21     0.01     0.79 1.00     3664
## cor(Intercept,Condition_c1)     0.09      0.56    -0.93     0.95 1.00     7107
##                             Tail_ESS
## sd(Intercept)                   2956
## sd(Condition_c1)                4069
## cor(Intercept,Condition_c1)     5373
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -3.17      0.13    -3.44    -2.93 1.00     6348
## Language_c1                 -0.19      0.25    -0.68     0.31 1.00     6165
## Condition_c1                -0.32      0.22    -0.77     0.10 1.00     7840
## Language_c1:Condition_c1    -0.11      0.42    -0.96     0.72 1.00     8714
##                          Tail_ESS
## Intercept                    5542
## Language_c1                  5087
## Condition_c1                 5593
## Language_c1:Condition_c1     6157
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probabilities of each:

path_posts <- posterior_samples(path_mdl)
manner_posts <- posterior_samples(manner_mdl)
ground_posts <- posterior_samples(ground_mdl)

Posterior probability of the condition effect being above 0:

sum(path_posts$b_Condition_c1 > 0) / nrow(path_posts)
## [1] 0.45575
sum(manner_posts$b_Condition_c1 > 0) / nrow(manner_posts)
## [1] 0.00575
sum(ground_posts$b_Condition_c1 > 0) / nrow(ground_posts)
## [1] 0.068375

Posterior probability of the interaction effect being above 0:

sum(path_posts$`b_Language_c1:Condition_c1` > 0) / nrow(path_posts)
## [1] 0.761875
sum(manner_posts$`b_Language_c1:Condition_c1` > 0) / nrow(manner_posts)
## [1] 0.947
sum(ground_posts$`b_Language_c1:Condition_c1` > 0) / nrow(ground_posts)
## [1] 0.391125

Check the posterior predictive probabilities for each:

pp_check(path_mdl, nsamples = 100)

pp_check(manner_mdl, nsamples = 100)

pp_check(ground_mdl, nsamples = 100)

Analysis: Viewpoint

Overall number of Character, Observer, and Dual:

dyads %>% summarize(Character = sum(Character),
                    Observer = sum(Observer),
                    Dual = sum(Observer))
## # A tibble: 1 x 3
##   Character Observer  Dual
##       <dbl>    <dbl> <dbl>
## 1       199      300   300

Calculate difference in percentage between character and observer viewpoint:

dyads <- mutate(dyads,
                Character_p = Character / (Character + Observer),
                Observer_p = Observer / (Character + Observer),
                Viewpoint_diff = Character_p - Observer_p)

Do this for speakers to see if this changes for superior versus friend:

viewpoint <- dyads %>% select(Language, ID, Condition, Viewpoint_diff) %>% 
  pivot_wider(names_from = Condition, values_from = Viewpoint_diff) %>% 
  mutate(PoliteDiff = Superior - Friend)

# Check:

viewpoint %>% print(n = Inf)
## # A tibble: 27 x 5
##    Language ID          Friend Superior PoliteDiff
##    <chr>    <chr>        <dbl>    <dbl>      <dbl>
##  1 Catalan  Catalan_1   0.273    0.0909    -0.182 
##  2 Catalan  Catalan_2  -0.111    0.333      0.444 
##  3 Catalan  Catalan_3   0.200   -0.714     -0.914 
##  4 Catalan  Catalan_4  -0.333   -1         -0.667 
##  5 Catalan  Catalan_5  -0.167   -0.5       -0.333 
##  6 Catalan  Catalan_6  -0.818   -0.6        0.218 
##  7 Catalan  Catalan_7  -0.231   -0.455     -0.224 
##  8 Catalan  Catalan_8  -0.40    -0.40       0     
##  9 Catalan  Catalan_9  -0.5     -1         -0.5   
## 10 Catalan  Catalan_11 -0.6     -0.5        0.1   
## 11 Catalan  Catalan_12  0       -0.273     -0.273 
## 12 Catalan  Catalan_13  0.273    0         -0.273 
## 13 Catalan  Catalan_14  0.143   -0.286     -0.429 
## 14 Catalan  Catalan_16 -0.111   -0.6       -0.489 
## 15 Korean   Korean_1    0.143   -0.385     -0.527 
## 16 Korean   Korean_2    0.231   -0.667     -0.897 
## 17 Korean   Korean_3   -0.286   -0.0769     0.209 
## 18 Korean   Korean_4    0.333    0.200     -0.133 
## 19 Korean   Korean_5   -0.25    -1         -0.75  
## 20 Korean   Korean_6    0.0909   0         -0.0909
## 21 Korean   Korean_7    0.111   -0.40      -0.511 
## 22 Korean   Korean_8   -0.385   -0.200      0.185 
## 23 Korean   Korean_9    0.231   -0.5       -0.731 
## 24 Korean   Korean_10  -0.167   -0.40      -0.233 
## 25 Korean   Korean_11  -0.333   -0.6       -0.267 
## 26 Korean   Korean_12   0.5      0         -0.5   
## 27 Korean   Korean_13   0.6      1          0.400

Very consistent picture where two-handed gestures are used less in the superior context. Compute for how many this is:

viewpoint <- viewpoint %>%
  mutate(Category = ifelse(PoliteDiff > 0, 'more polite', PoliteDiff),
         Category = ifelse(Category < 0, 'less polite', Category),
         Category = ifelse(Category == 0, 'same', Category))

# Count:

viewpoint %>% count(Language, Category)
## # A tibble: 5 x 3
##   Language Category        n
##   <chr>    <chr>       <int>
## 1 Catalan  less polite    10
## 2 Catalan  more polite     3
## 3 Catalan  same            1
## 4 Korean   less polite    10
## 5 Korean   more polite     3

Create a model for viewpoint. We will represent this as a binomial problem since this is a choice of character viewpoint or observer viewpoint. For this we need a “trial” variable (N of the binomial distribution), which will be the sum of observer and character gestures:

dyads <- mutate(dyads,
                Trial = Observer + Character)

For plotting, create data frame with by-participant data:

character_avg <- dyads %>% mutate(Character_prop = Character / Trial) %>% 
  group_by(ID, Language, Condition) %>% 
  summarize(Proportion = mean(Character_prop))
## `summarise()` has grouped output by 'ID', 'Language'. You can override using the `.groups` argument.

Create the difference labels:

# Compute differences:

diffs <- character_avg %>% group_by(ID) %>% 
  summarize(diff = diff(Proportion)) %>% 
  mutate(trend = ifelse(diff < 0, 'down', 'not down'))

# Add do average data frame:

character_avg <- left_join(character_avg, diffs)
## Joining, by = "ID"

New ggplots for this:

character_p <- character_avg %>% ggplot(aes(x = Condition, y = Proportion,
                       group = ID,
                       fill = Condition)) +
  geom_line(aes(col = trend)) +
  scale_color_manual(values = c('black', 'grey')) +
  geom_point(size = 3, shape = 21,
             alpha = 0.85) +
  scale_fill_manual(values = c("#E69F00", "#0072B2")) +
  xlab('') +
  ylab('Proportion of\ncharacter viewpoint gestures') +
  theme_minimal() +
  theme(legend.position = 'none') +
  theme(axis.title.y = element_text(margin = margin(t = 0, r = 16,
                                                    b = 0, l = 0),
                                    size = 16, face = 'bold'),
        axis.text.x = element_text(face = 'bold', size = 12),
        strip.text.x = element_text(face = 'bold', size = 16)) +
  facet_wrap(~Language)

# Save:

character_p

ggsave(plot = character_p, filename = '../figures/character_VP.pdf',
       width = 6, height = 4)

Fit the model:

viewpoint_mdl <- brm(Character | trials(Trial) ~ Language_c * Condition_c +
                       (1 + Condition_c|ID),
                     data = dyads, family = binomial,
                     prior = my_priors, control = my_controls,
                     seed = 42,
                     init = 0, chains = 4, iter = my_iter, warmup = my_warmup)

Summarize model:

summary(viewpoint_mdl)
##  Family: binomial 
##   Links: mu = logit 
## Formula: Character | trials(Trial) ~ Language_c * Condition_c + (1 + Condition_c | ID) 
##    Data: dyads (Number of observations: 54) 
## Samples: 4 chains, each with iter = 6000; warmup = 4000; thin = 1;
##          total post-warmup samples = 8000
## 
## Group-Level Effects: 
## ~ID (Number of levels: 27) 
##                             Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept)                   0.36      0.18     0.04     0.73 1.00     2152
## sd(Condition_c1)                0.24      0.18     0.01     0.69 1.00     3895
## cor(Intercept,Condition_c1)     0.02      0.57    -0.94     0.95 1.00     6730
##                             Tail_ESS
## sd(Intercept)                   2882
## sd(Condition_c1)                2970
## cor(Intercept,Condition_c1)     4751
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -0.46      0.13    -0.70    -0.22 1.00     5834
## Language_c1                  0.36      0.25    -0.12     0.85 1.00     5712
## Condition_c1                -0.60      0.20    -1.01    -0.21 1.00     8565
## Language_c1:Condition_c1    -0.12      0.40    -0.90     0.64 1.00     9644
##                          Tail_ESS
## Intercept                    5580
## Language_c1                  5183
## Condition_c1                 5624
## Language_c1:Condition_c1     6083
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Get the posterior probability of the interaction and language effects for vertical:

posts <- posterior_samples(viewpoint_mdl)

Posterior probability of the condition effect being above 0:

sum(posts$b_Condition_c1 > 0) / nrow(posts)
## [1] 0.00125

Posterior probability of the interaction effect being above 0:

sum(posts$`b_Language_c1:Condition_c1` > 0) / nrow(posts)
## [1] 0.383125

Posterior predictive checks to see whether our model could’ve predicted the data, vertical:

pp_check(viewpoint_mdl, nsamples = 100)

This completes this analysis.